struct domain *d;
unsigned int fp = domctl->u.ioport_permission.first_port;
unsigned int np = domctl->u.ioport_permission.nr_ports;
+ int allow = domctl->u.ioport_permission.allow_access;
ret = -EINVAL;
if ( (fp + np) > 65536 )
if ( np == 0 )
ret = 0;
- else if ( domctl->u.ioport_permission.allow_access )
+ else if ( xsm_ioport_permission(d, fp, fp + np - 1, allow) )
+ ret = -EPERM;
+ else if ( allow )
ret = ioports_permit_access(d, fp, fp + np - 1);
else
ret = ioports_deny_access(d, fp, fp + np - 1);
unsigned long gfn = domctl->u.memory_mapping.first_gfn;
unsigned long mfn = domctl->u.memory_mapping.first_mfn;
unsigned long nr_mfns = domctl->u.memory_mapping.nr_mfns;
+ int add = domctl->u.memory_mapping.add_mapping;
int i;
ret = -EINVAL;
if ( unlikely((d = rcu_lock_domain_by_id(domctl->domain)) == NULL) )
break;
- ret=0;
- if ( domctl->u.memory_mapping.add_mapping )
+ ret = xsm_iomem_permission(d, mfn, mfn + nr_mfns - 1, add);
+ if ( ret ) {
+ rcu_unlock_domain(d);
+ break;
+ }
+
+ if ( add )
{
gdprintk(XENLOG_INFO,
"memory_map:add: gfn=%lx mfn=%lx nr_mfns=%lx\n",
unsigned int fgp = domctl->u.ioport_mapping.first_gport;
unsigned int fmp = domctl->u.ioport_mapping.first_mport;
unsigned int np = domctl->u.ioport_mapping.nr_ports;
+ unsigned int add = domctl->u.ioport_mapping.add_mapping;
struct g2m_ioport *g2m_ioport;
int found = 0;
if ( unlikely((d = rcu_lock_domain_by_id(domctl->domain)) == NULL) )
break;
+ ret = xsm_ioport_permission(d, fmp, fmp + np - 1, add);
+ if ( ret ) {
+ rcu_unlock_domain(d);
+ break;
+ }
+
hd = domain_hvm_iommu(d);
- if ( domctl->u.ioport_mapping.add_mapping )
+ if ( add )
{
gdprintk(XENLOG_INFO,
"ioport_map:add f_gport=%x f_mport=%x np=%x\n",
#include <xen/iocap.h>
#include <xen/iommu.h>
#include <xen/trace.h>
+#include <xsm/xsm.h>
#include <asm/msi.h>
#include <asm/current.h>
#include <asm/flushtlb.h>
return 0;
}
+ ret = xsm_irq_permission(d, irq, 1);
+ if ( ret )
+ {
+ dprintk(XENLOG_G_ERR, "dom%d: could not permit access to irq %d mapping to pirq %d\n",
+ d->domain_id, irq, pirq);
+ return ret;
+ }
+
ret = irq_permit_access(d, pirq);
if ( ret )
{
if ( !IS_PRIV_FOR(current->domain, d) )
goto free_domain;
+ ret = xsm_irq_permission(d, pirq, 0);
+ if ( ret )
+ goto free_domain;
+
spin_lock(&pcidevs_lock);
spin_lock(&d->event_lock);
ret = unmap_domain_pirq(d, pirq);
{
struct domain *d;
unsigned int pirq = op->u.irq_permission.pirq;
+ int allow = op->u.irq_permission.allow_access;
ret = -ESRCH;
d = rcu_lock_domain_by_id(op->domain);
if ( pirq >= d->nr_pirqs )
ret = -EINVAL;
- else if ( op->u.irq_permission.allow_access )
+ else if ( xsm_irq_permission(d, pirq, allow) )
+ ret = -EPERM;
+ else if ( allow )
ret = irq_permit_access(d, pirq);
else
ret = irq_deny_access(d, pirq);
struct domain *d;
unsigned long mfn = op->u.iomem_permission.first_mfn;
unsigned long nr_mfns = op->u.iomem_permission.nr_mfns;
+ int allow = op->u.iomem_permission.allow_access;
ret = -EINVAL;
if ( (mfn + nr_mfns - 1) < mfn ) /* wrap? */
if ( d == NULL )
break;
- if ( op->u.iomem_permission.allow_access )
+ if ( xsm_iomem_permission(d, mfn, mfn + nr_mfns - 1, allow) )
+ ret = -EPERM;
+ else if ( allow )
ret = iomem_permit_access(d, mfn, mfn + nr_mfns - 1);
else
ret = iomem_deny_access(d, mfn, mfn + nr_mfns - 1);
struct range *x, *y;
int rc = 0;
- rc = xsm_add_range(r->domain, r->name, s, e);
- if ( rc )
- return rc;
-
ASSERT(s <= e);
spin_lock(&r->lock);
struct range *x, *y, *t;
int rc = 0;
- rc = xsm_remove_range(r->domain, r->name, s, e);
- if ( rc )
- return rc;
-
ASSERT(s <= e);
spin_lock(&r->lock);
int (*kexec) (void);
int (*schedop_shutdown) (struct domain *d1, struct domain *d2);
- int (*add_range) (struct domain *d, char *name, unsigned long s, unsigned long e);
- int (*remove_range) (struct domain *d, char *name, unsigned long s, unsigned long e);
+ int (*irq_permission) (struct domain *d, int pirq, uint8_t allow);
+ int (*iomem_permission) (struct domain *d, uint64_t s, uint64_t e, uint8_t allow);
int (*test_assign_device) (uint32_t machine_bdf);
int (*assign_device) (struct domain *d, uint32_t machine_bdf);
int (*pin_mem_cacheattr) (struct domain *d);
int (*ext_vcpucontext) (struct domain *d, uint32_t cmd);
int (*vcpuextstate) (struct domain *d, uint32_t cmd);
+ int (*ioport_permission) (struct domain *d, uint32_t s, uint32_t e, uint8_t allow);
#endif
};
return xsm_call(schedop_shutdown(d1, d2));
}
-static inline int xsm_add_range (struct domain *d, char *name, unsigned long s,
- unsigned long e)
+static inline int xsm_irq_permission (struct domain *d, int pirq, uint8_t allow)
{
- return xsm_call(add_range(d, name, s, e));
+ return xsm_call(irq_permission(d, pirq, allow));
}
-
-static inline int xsm_remove_range (struct domain *d, char *name, unsigned long s,
- unsigned long e)
+
+static inline int xsm_iomem_permission (struct domain *d, uint64_t s, uint64_t e, uint8_t allow)
{
- return xsm_call(remove_range(d, name, s, e));
+ return xsm_call(iomem_permission(d, s, e, allow));
}
static inline int xsm_test_assign_device(uint32_t machine_bdf)
{
return xsm_call(vcpuextstate(d, cmd));
}
+
+static inline int xsm_ioport_permission (struct domain *d, uint32_t s, uint32_t e, uint8_t allow)
+{
+ return xsm_call(ioport_permission(d, s, e, allow));
+}
#endif /* CONFIG_X86 */
extern struct xsm_operations dummy_xsm_ops;
return -ENOSYS;
}
-static int dummy_add_range (struct domain *d, char *name, unsigned long s, unsigned long e)
+static int dummy_irq_permission (struct domain *d, int pirq, uint8_t allow)
{
return 0;
}
-static int dummy_remove_range (struct domain *d, char *name, unsigned long s,
- unsigned long e)
+static int dummy_iomem_permission (struct domain *d, uint64_t s, uint64_t e, uint8_t allow)
{
return 0;
}
return 0;
}
+static int dummy_ioport_permission (struct domain *d, uint32_t s, uint32_t e, uint8_t allow)
+{
+ return 0;
+}
#endif
struct xsm_operations dummy_xsm_ops;
set_to_dummy_if_null(ops, kexec);
set_to_dummy_if_null(ops, schedop_shutdown);
- set_to_dummy_if_null(ops, add_range);
- set_to_dummy_if_null(ops, remove_range);
+ set_to_dummy_if_null(ops, irq_permission);
+ set_to_dummy_if_null(ops, iomem_permission);
set_to_dummy_if_null(ops, __do_xsm_op);
set_to_dummy_if_null(ops, pin_mem_cacheattr);
set_to_dummy_if_null(ops, ext_vcpucontext);
set_to_dummy_if_null(ops, vcpuextstate);
+ set_to_dummy_if_null(ops, ioport_permission);
#endif
}
return RESOURCE__REMOVE;
}
-static int irq_has_perm(struct domain *d, uint8_t pirq, uint8_t access)
+static int flask_irq_permission (struct domain *d, int pirq, uint8_t access)
{
u32 perm;
u32 rsid;
return rc;
if ( access )
- return avc_has_perm(tsec->sid, rsid, SECCLASS_RESOURCE,
+ rc = avc_has_perm(tsec->sid, rsid, SECCLASS_RESOURCE,
RESOURCE__USE, &ad);
- else
- return rc;
+ return rc;
}
struct iomem_has_perm_data {
return avc_has_perm(data->tsec->sid, sid, SECCLASS_RESOURCE, RESOURCE__USE, &ad);
}
-static int iomem_has_perm(struct domain *d, unsigned long start, unsigned long end, uint8_t access)
+static int flask_iomem_permission(struct domain *d, uint64_t start, uint64_t end, uint8_t access)
{
struct iomem_has_perm_data data;
int rc;
}
-static int ioport_has_perm(struct domain *d, uint32_t start, uint32_t end, uint8_t access)
+static int flask_ioport_permission(struct domain *d, uint32_t start, uint32_t end, uint8_t access)
{
int rc;
struct ioport_has_perm_data data;
{
u32 rsid;
int rc = -EPERM;
+ int irq;
struct domain_security_struct *ssec, *tsec;
+ struct avc_audit_data ad;
rc = domain_has_perm(current->domain, d, SECCLASS_RESOURCE, RESOURCE__ADD);
if ( rc )
return rc;
- rc = security_pirq_sid(bind->machine_irq, &rsid);
+ irq = domain_pirq_to_irq(d, bind->machine_irq);
+
+ rc = security_pirq_sid(irq, &rsid);
if ( rc )
return rc;
+ AVC_AUDIT_DATA_INIT(&ad, DEV);
+ ad.device = (unsigned long)irq;
+
ssec = current->domain->ssid;
- rc = avc_has_perm(ssec->sid, rsid, SECCLASS_HVM, HVM__BIND_IRQ, NULL);
+ rc = avc_has_perm(ssec->sid, rsid, SECCLASS_HVM, HVM__BIND_IRQ, &ad);
if ( rc )
return rc;
tsec = d->ssid;
- return avc_has_perm(tsec->sid, rsid, SECCLASS_RESOURCE, RESOURCE__USE, NULL);
+ return avc_has_perm(tsec->sid, rsid, SECCLASS_RESOURCE, RESOURCE__USE, &ad);
}
static int flask_pin_mem_cacheattr (struct domain *d)
}
#endif
-static int io_has_perm(struct domain *d, char *name, unsigned long s,
- unsigned long e, u32 access)
-{
- int rc = -EPERM;
-
- if ( strcmp(name, "I/O Memory") == 0 )
- {
- rc = iomem_has_perm(d, s, e, access);
- if ( rc )
- return rc;
- }
- else if ( strcmp(name, "Interrupts") == 0 )
- {
- while (s <= e) {
- rc = irq_has_perm(d, s, access);
- if ( rc )
- return rc;
- s++;
- }
- }
-#ifdef CONFIG_X86
- else if ( strcmp(name, "I/O Ports") == 0 )
- {
- rc = ioport_has_perm(d, s, e, access);
- if ( rc )
- return rc;
- }
-#endif
-
- return rc;
-}
-
-static int flask_add_range(struct domain *d, char *name, unsigned long s,
- unsigned long e)
-{
- return io_has_perm(d, name, s, e, 1);
-}
-
-static int flask_remove_range(struct domain *d, char *name, unsigned long s,
- unsigned long e)
-{
- return io_has_perm(d, name, s, e, 0);
-}
-
long do_flask_op(XEN_GUEST_HANDLE(xsm_op_t) u_flask_op);
static struct xsm_operations flask_ops = {
.kexec = flask_kexec,
.schedop_shutdown = flask_schedop_shutdown,
- .add_range = flask_add_range,
- .remove_range = flask_remove_range,
+ .irq_permission = flask_irq_permission,
+ .iomem_permission = flask_iomem_permission,
.__do_xsm_op = do_flask_op,
.pin_mem_cacheattr = flask_pin_mem_cacheattr,
.ext_vcpucontext = flask_ext_vcpucontext,
.vcpuextstate = flask_vcpuextstate,
+ .ioport_permission = flask_ioport_permission,
#endif
};